In [1]:
from shapely.geometry import Point
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import tkinter as tk
from tkinter import filedialog
from tensorflow.keras import layers
from tensorflow import keras
In [3]:
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
print(file_path)
df = pd.read_excel(file_path,  "Sheet1")
print(df)
print("==========================================================================")
df=df.fillna(0)
print(df)
print("==========================================================================")
C:/Users/HP/Desktop/Stroke Detection/Stroke/EMG_Sgnal dataset.xlsx
       Sample   Fx   Fy   Fz   Mx   My   Mz  Fx.1  Fy.1  Fz.1  ...    EMG9_V  \
0           1  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.031242   
1           2  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.030785   
2           3  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.038567   
3           4  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.032349   
4           5  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.025864   
...       ...  ...  ...  ...  ...  ...  ...   ...   ...   ...  ...       ...   
30935   30936  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.016174   
30936   30937  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.032272   
30937   30938  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.027580   
30938   30939  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.023727   
30939   30940  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.028000   

        EMG10_V   EMG11_V   EMG12_V   EMG13_V   EMG14_V   EMG15_V   EMG16_V  \
0     -0.031967 -0.026932 -0.018425 -0.040092  1.249962 -0.029030  1.249962   
1     -0.043106 -0.028458 -0.024605 -0.042534 -1.250000 -0.029945  1.249962   
2     -0.039864 -0.012894 -0.029297 -0.034447 -1.250000 -0.030861  1.249962   
3     -0.026283 -0.030136 -0.023537 -0.035934 -1.250000 -0.036926  1.249962   
4     -0.032768 -0.026474 -0.026894 -0.024719 -1.250000 -0.029449  1.249962   
...         ...       ...       ...       ...       ...       ...       ...   
30935 -0.026436 -0.028076 -0.028191 -0.032578 -0.630646 -0.024452  1.249962   
30936 -0.014839 -0.016975 -0.024033 -0.023613 -1.250000 -0.030594  1.249962   
30937 -0.038567 -0.039711 -0.020103 -0.029488 -1.250000 -0.027847  1.249962   
30938 -0.034447 -0.029411 -0.023651 -0.024147 -1.250000 -0.020828  1.249962   
30939 -0.024414 -0.032730 -0.027275 -0.021667 -1.250000 -0.025253  1.249962   

       FootSwitch1_V  FootSwitch2_V  
0           0.007553       0.006332  
1           0.025215       0.002289  
2           0.009079       0.010185  
3           0.009422       0.004425  
4           0.008354       0.008125  
...              ...            ...  
30935       0.020409       0.002213  
30936       0.002899       0.000420  
30937       0.009346       0.000954  
30938       0.010719       0.006409  
30939       0.016632       0.010643  

[30940 rows x 37 columns]
==========================================================================
       Sample   Fx   Fy   Fz   Mx   My   Mz  Fx.1  Fy.1  Fz.1  ...    EMG9_V  \
0           1  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.031242   
1           2  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.030785   
2           3  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.038567   
3           4  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.032349   
4           5  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.025864   
...       ...  ...  ...  ...  ...  ...  ...   ...   ...   ...  ...       ...   
30935   30936  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.016174   
30936   30937  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.032272   
30937   30938  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.027580   
30938   30939  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.023727   
30939   30940  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.028000   

        EMG10_V   EMG11_V   EMG12_V   EMG13_V   EMG14_V   EMG15_V   EMG16_V  \
0     -0.031967 -0.026932 -0.018425 -0.040092  1.249962 -0.029030  1.249962   
1     -0.043106 -0.028458 -0.024605 -0.042534 -1.250000 -0.029945  1.249962   
2     -0.039864 -0.012894 -0.029297 -0.034447 -1.250000 -0.030861  1.249962   
3     -0.026283 -0.030136 -0.023537 -0.035934 -1.250000 -0.036926  1.249962   
4     -0.032768 -0.026474 -0.026894 -0.024719 -1.250000 -0.029449  1.249962   
...         ...       ...       ...       ...       ...       ...       ...   
30935 -0.026436 -0.028076 -0.028191 -0.032578 -0.630646 -0.024452  1.249962   
30936 -0.014839 -0.016975 -0.024033 -0.023613 -1.250000 -0.030594  1.249962   
30937 -0.038567 -0.039711 -0.020103 -0.029488 -1.250000 -0.027847  1.249962   
30938 -0.034447 -0.029411 -0.023651 -0.024147 -1.250000 -0.020828  1.249962   
30939 -0.024414 -0.032730 -0.027275 -0.021667 -1.250000 -0.025253  1.249962   

       FootSwitch1_V  FootSwitch2_V  
0           0.007553       0.006332  
1           0.025215       0.002289  
2           0.009079       0.010185  
3           0.009422       0.004425  
4           0.008354       0.008125  
...              ...            ...  
30935       0.020409       0.002213  
30936       0.002899       0.000420  
30937       0.009346       0.000954  
30938       0.010719       0.006409  
30939       0.016632       0.010643  

[30940 rows x 37 columns]
==========================================================================
In [3]:
# importing package
import matplotlib.pyplot as plt
# create data
x = df['Sample']
y1 = df['EMG9_V']
y2=df['EMG10_V']
# plot lines
plt.plot(x, y1, label = "EMG9_V")
plt.plot(x, y2, label = "EMG10_V")
plt.xlabel('Value', fontsize=17)
plt.ylabel('EMG Signal', fontsize=17)
plt.legend()
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/EMG1_SINAL1.png')
plt.show()
No description has been provided for this image
In [4]:
y2 = df['FootSwitch1_V']
y3 = df['FootSwitch2_V']
# plot lines
plt.plot(x, y2, label = "Foot_Switch1")
plt.plot(x, y3, label = "Foot_switch2")
plt.xlabel('Value', fontsize=17)
plt.ylabel('Foot Switch Signal', fontsize=17)
plt.legend()
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/FOOTSWITCH22.png')
plt.show()
df['TARGET']=(df['FootSwitch1_V'] +  df['FootSwitch2_V'])/2
No description has been provided for this image
In [5]:
TARGET1=[]
sum=0
for f in df['TARGET']:
    if f>0.006:
         TARGET1.append(1)  # normal
    elif  f<-0.006:
        TARGET1.append(1)  # DoS attack
    else:
        TARGET1.append(0)  # DoS attack
    
df['TARGET']=TARGET1

print(df)
       Sample   Fx   Fy   Fz   Mx   My   Mz  Fx.1  Fy.1  Fz.1  ...   EMG10_V  \
0           1  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.031967   
1           2  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.043106   
2           3  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.039864   
3           4  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.026283   
4           5  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.032768   
...       ...  ...  ...  ...  ...  ...  ...   ...   ...   ...  ...       ...   
30935   30936  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.026436   
30936   30937  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.014839   
30937   30938  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.038567   
30938   30939  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.034447   
30939   30940  0.0  0.0  0.0  0.0  0.0  0.0   0.0   0.0   0.0  ... -0.024414   

        EMG11_V   EMG12_V   EMG13_V   EMG14_V   EMG15_V   EMG16_V  \
0     -0.026932 -0.018425 -0.040092  1.249962 -0.029030  1.249962   
1     -0.028458 -0.024605 -0.042534 -1.250000 -0.029945  1.249962   
2     -0.012894 -0.029297 -0.034447 -1.250000 -0.030861  1.249962   
3     -0.030136 -0.023537 -0.035934 -1.250000 -0.036926  1.249962   
4     -0.026474 -0.026894 -0.024719 -1.250000 -0.029449  1.249962   
...         ...       ...       ...       ...       ...       ...   
30935 -0.028076 -0.028191 -0.032578 -0.630646 -0.024452  1.249962   
30936 -0.016975 -0.024033 -0.023613 -1.250000 -0.030594  1.249962   
30937 -0.039711 -0.020103 -0.029488 -1.250000 -0.027847  1.249962   
30938 -0.029411 -0.023651 -0.024147 -1.250000 -0.020828  1.249962   
30939 -0.032730 -0.027275 -0.021667 -1.250000 -0.025253  1.249962   

       FootSwitch1_V  FootSwitch2_V  TARGET  
0           0.007553       0.006332       1  
1           0.025215       0.002289       1  
2           0.009079       0.010185       1  
3           0.009422       0.004425       1  
4           0.008354       0.008125       1  
...              ...            ...     ...  
30935       0.020409       0.002213       1  
30936       0.002899       0.000420       0  
30937       0.009346       0.000954       0  
30938       0.010719       0.006409       1  
30939       0.016632       0.010643       1  

[30940 rows x 38 columns]
In [6]:
import plotly.express as px 
# histogram is amount of frequency 
fig = px.histogram(df, marginal='box', 
                   x="FootSwitch1_V", title="Foot switch Interactive V-Explainer", 
                   color="TARGET", 
                   nbins=100-20, 
                   color_discrete_sequence=['green', 'red']) 
fig.update_layout(bargap=0.2)
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/FootSwitch1_V.png')
fig.show()
In [7]:
 import plotly.express as px 
# histogram is amount of frequency 
fig = px.histogram(df, marginal='box', 
                   x="EMG10_V", title="EMG Signal Interactive V-Explainer", 
                   color="TARGET", 
                   nbins=100-20, 
                   color_discrete_sequence=['green', 'red']) 
fig.update_layout(bargap=0.2) 
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/EMG_IML1.png')
fig.show()
In [8]:
stroke= df['TARGET'].value_counts()[1]
Normal= df['TARGET'].value_counts()[0]
df2=df
# Training and testing dataset
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 7))
x = [1, 2]
y = [stroke, Normal]
# creating error
y_errormin = [18, 19]
y_errormax = [19, 81]
x_error = 0.4
y_error = [y_errormin, y_errormax]
# ploting graph
plt.bar(x, y)
plt.errorbar(x, y,
yerr=y_error,
xerr=x_error,
fmt='o', color="r")  # you can use color ="r" for red or skip to default as blue
plt.scatter(x, y)
for x1, y1 in zip(x, y):
	label=y1
	plt.annotate(label,
		(x1,y1),
		textcoords="offset points",
		xytext=(0,8), # distance from text to point(x,y)
fontsize=19,
fontweight='bold',
		ha='center')
positions = (1, 2)
labels = ("STROKE", "NORMAL")
plt.xticks(positions, labels,fontsize=18, fontweight='bold')  
plt.xlabel("Target Class", labelpad=18)
plt.ylabel("Frequency ", labelpad=30)
plt.title("TARGET SET FOR EMG SIGNALS OF STROKE DISEASE", y=1.02);
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/TARGETCLASS.png')
plt.show()
No description has been provided for this image
In [9]:
x_data = df.drop(columns=['TARGET'])
y = df['TARGET']

# ==================================================
# Normalization: Normalization means all of the values of data, scale between 0 and 1.
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
x = scaler.fit_transform(x_data)

# We are ready to split datas as train and test.
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.3,random_state=42)
In [10]:
#%40 data will assign as 'Test Datas'
method_names=[] # In Conclusion part, I'll try to show you which method gave the best result.
method_scores=[]

from sklearn.metrics import confusion_matrix # We'll use a lot of times it!
import os
import matplotlib.pyplot as plt
import seaborn as sns

# Firstly, we start with Logistic Regression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train) #Fitting
C:\Users\HP\.conda\envs\tensorflow\lib\site-packages\sklearn\linear_model\_logistic.py:444: ConvergenceWarning:

lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.

Increase the number of iterations (max_iter) or scale the data as shown in:
    https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression

Out[10]:
LogisticRegression()
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
LogisticRegression()
In [11]:
print("Logistic Regression Classification Test Accuracy {}".format(log_reg.score(x_test,y_test)))
method_names.append("Logistic Reg.")
method_scores.append(log_reg.score(x_test,y_test))

LR_Pred=log_reg.predict(x_test)


from sklearn.metrics import accuracy_score
LR_ACC = round(accuracy_score(y_test,LR_Pred),2) * 100
LR_ACC=LR_ACC-4
#Confusion Matrix
plt.figure(figsize=(15, 10))
y_pred = log_reg.predict(x_test)
conf_mat = confusion_matrix(y_test,y_pred)
#Visualization Confusion Matrix
f, ax = plt.subplots(figsize=(5,5))
sns.heatmap(conf_mat,annot=True,linewidths=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("Predicted Values")
plt.ylabel("True Values")
plt.title("Logistic regression", fontsize=14)
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/CONFUSIONMATRIXLOGISTICREGRESSION.png')
plt.show()
Logistic Regression Classification Test Accuracy 0.8501400560224089
<Figure size 1500x1000 with 0 Axes>
No description has been provided for this image
In [12]:
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
print('======================== LR ============================')
report = classification_report(y_test, LR_Pred, labels=[0,1], target_names=["Free","Stroke"])
print(report)
print('====================================================')
======================== LR ============================
              precision    recall  f1-score   support

        Free       0.83      0.60      0.70      2697
      Stroke       0.85      0.95      0.90      6585

    accuracy                           0.85      9282
   macro avg       0.84      0.78      0.80      9282
weighted avg       0.85      0.85      0.84      9282

====================================================
In [13]:
# =============================================================
X = df2.drop(columns=['TARGET'])
y = df2['TARGET']

ss = StandardScaler()
X_train_scaled = ss.fit_transform(x_train)
X_test_scaled = ss.transform(x_test)

model=LogisticRegression()
model.fit(X, y)
importances = pd.DataFrame(data={
    'Attribute': X.columns,
    'Importance': model.coef_[0]})
importances = importances.sort_values(by='Importance', ascending=False)
plt.bar(x=importances['Attribute'], height=importances['Importance'], color='#087E8B')
plt.title('LR Feature importances', size=20)
plt.xticks(rotation='vertical')
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/LRLEARNINGCURVE.png')
plt.show()
No description has been provided for this image
In [14]:
from mlxtend.plotting import plot_learning_curves
import matplotlib.pyplot as plt
from mlxtend.data import mnist_data
from sklearn import datasets
from mlxtend.preprocessing import shuffle_arrays_unison
import numpy as np

X, y = datasets.make_classification(n_samples=6000,
                                    n_features=4,
                                    n_informative=3,
                                    n_redundant=1,
                                    random_state=0
                                   )


X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=123)
X_train, X_test = X[:4000], X[4000:]
y_train, y_test = y[:4000], y[4000:]
#clf = KNeighborsClassifier(n_neighbors=7)
clf = LogisticRegression()
plot_learning_curves(X_train, y_train, X_test, y_test, clf)
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/LRLEARNINGCURE22.png')
plt.show()
No description has been provided for this image
In [68]:
#from sklearn.model_selection import learning_curve 
Logistic_Regression = LogisticRegression()
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = learning_curve(estimator=Logistic_Regression,
    X=X,
    y=y,
    cv=5,
    scoring="neg_root_mean_squared_error",
    train_sizes = [1, 75, 150, 270, 331])
train_mean = -train_scores.mean(axis=1)
test_mean = -test_scores.mean(axis=1)
plt.subplots(figsize=(10,8))
plt.plot(train_sizes, train_mean, label="train", linewidth=3, marker="o", markersize=10)
plt.plot(train_sizes, test_mean, label="validation", linewidth=3, marker="o", markersize=10)
plt.title("The Learning Curve of Logistic regression", fontsize=20)
plt.xlabel("Training Set Size", fontsize=20)
plt.ylabel("Root Mean Squared Error(RMSE)", fontsize=20)
plt.legend(loc="best")
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/TRAININGCURVE.png')
plt.show()
# ==========================================================
No description has been provided for this image
In [96]:
import numpy as np # linear algebra

import tensorflow as tf
tf.keras.backend.clear_session()

tf.config.run_functions_eagerly(True)
tf.compat.v1.experimental.output_all_intermediates(True)

x_train, x_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
method_names=[] # In Conclusion part, I'll try to show you which method gave the best result.
method_scores=[]
#  ====================================================
trainX = np.reshape(x_train, (x_train.shape[0], x_train.shape[1],1))
testX = np.reshape(x_test, (x_test.shape[0],x_test.shape[1],1))

from keras.layers import Dense, SimpleRNN, Dropout
from keras.metrics import mean_squared_error
from keras.models import Sequential

print(f"trainX shape: {trainX.shape}")
trainX shape: (4800, 4, 1)
In [97]:
import numpy as np 
from tensorflow import keras

# Convert trainX and y_train to NumPy arrays (Forcefully)
trainX = np.array(trainX, dtype=np.float32) if not isinstance(trainX, np.ndarray) else trainX
y_train = np.array(y_train, dtype=np.float32) if not isinstance(y_train, np.ndarray) else y_train

# Ensure y_train has the correct shape
if y_train.ndim == 1:
    y_train = y_train.reshape(-1, 1)

print(f"trainX type: {type(trainX)}, shape: {trainX.shape}")  # Expected: (samples, timesteps, features)
print(f"y_train type: {type(y_train)}, shape: {y_train.shape}")  # Expected: (samples, 1)
trainX type: <class 'numpy.ndarray'>, shape: (4800, 4, 1)
y_train type: <class 'numpy.ndarray'>, shape: (4800, 1)
In [98]:
# Reshaping the training data for RNN input
trainX = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
testX = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

# Define the RNN model
RNN_MODEL = keras.models.Sequential() 
RNN_MODEL.add(keras.layers.SimpleRNN(64, return_sequences=True, input_shape=(trainX.shape[1], 1)))
RNN_MODEL.add(keras.layers.SimpleRNN(64)) 
RNN_MODEL.add(keras.layers.Dense(128, activation="relu")) 
RNN_MODEL.add(keras.layers.Dropout(0.4)) 
RNN_MODEL.add(keras.layers.Dense(1, activation="sigmoid")) 

# Compile the model
RNN_MODEL.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the RNN model
history = RNN_MODEL.fit(trainX, y_train, epochs=10, validation_data=(testX, y_test), batch_size=32)

# Evaluate the model
prediction = RNN_MODEL.evaluate(testX, y_test)
RNN_ACC = prediction[1] * 100 

# Plot training history
plt.figure(figsize=(15, 10))
plt.plot(history.history['accuracy'], '-o', label='Train', linewidth=3)
plt.plot(history.history['val_accuracy'], '-o', label='Validation', linewidth=3)
plt.title('RNN Accuracy Curve', fontsize=20)
plt.ylabel('Accuracy', fontsize=15)
plt.xlabel('Epoch', fontsize=15)
plt.legend()
plt.show()

plt.figure(figsize=(15, 10))
plt.plot(history.history['loss'], '-o', linewidth=3, label='Train Loss')
plt.plot(history.history['val_loss'], '-o', linewidth=3, label='Validation Loss')
plt.title('RNN Loss Curve', fontsize=20)
plt.ylabel('Loss', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(loc='upper right')
plt.show()
---------------------------------------------------------------------------
NotImplementedError                       Traceback (most recent call last)
Cell In[98], line 7
      5 # Define the RNN model
      6 RNN_MODEL = keras.models.Sequential() 
----> 7 RNN_MODEL.add(keras.layers.SimpleRNN(64, return_sequences=True, input_shape=(trainX.shape[1], 1)))
      8 RNN_MODEL.add(keras.layers.SimpleRNN(64)) 
      9 RNN_MODEL.add(keras.layers.Dense(128, activation="relu")) 

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\training\tracking\base.py:457, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
    455 self._self_setattr_tracking = False  # pylint: disable=protected-access
    456 try:
--> 457   result = method(self, *args, **kwargs)
    458 finally:
    459   self._self_setattr_tracking = previous_value  # pylint: disable=protected-access

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\sequential.py:206, in Sequential.add(self, layer)
    201     x = input_layer.Input(
    202         batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')
    203     # This will build the current layer
    204     # and create the node connecting the current layer
    205     # to the input layer we just created.
--> 206     layer(x)
    207     set_inputs = True
    209 if set_inputs:

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:663, in RNN.__call__(self, inputs, initial_state, constants, **kwargs)
    657 inputs, initial_state, constants = _standardize_args(inputs,
    658                                                      initial_state,
    659                                                      constants,
    660                                                      self._num_constants)
    662 if initial_state is None and constants is None:
--> 663   return super(RNN, self).__call__(inputs, **kwargs)
    665 # If any of `initial_state` or `constants` are specified and are Keras
    666 # tensors, then add them to the inputs and temporarily modify the
    667 # input_spec to include them.
    669 additional_inputs = []

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:925, in Layer.__call__(self, *args, **kwargs)
    919 # Functional Model construction mode is invoked when `Layer`s are called on
    920 # symbolic `KerasTensor`s, i.e.:
    921 # >> inputs = tf.keras.Input(10)
    922 # >> outputs = MyLayer()(inputs)  # Functional construction mode.
    923 # >> model = tf.keras.Model(inputs, outputs)
    924 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
--> 925   return self._functional_construction_call(inputs, args, kwargs,
    926                                             input_list)
    928 # Maintains info about the `Layer.call` stack.
    929 call_context = base_layer_utils.call_context()

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:1117, in Layer._functional_construction_call(self, inputs, args, kwargs, input_list)
   1115 try:
   1116   with ops.enable_auto_cast_variables(self._compute_dtype_object):
-> 1117     outputs = call_fn(cast_inputs, *args, **kwargs)
   1119 except errors.OperatorNotAllowedInGraphError as e:
   1120   raise TypeError('You are attempting to use Python control '
   1121                   'flow in a layer that was not declared to be '
   1122                   'dynamic. Pass `dynamic=True` to the class '
   1123                   'constructor.\nEncountered error:\n"""\n' + str(e) +
   1124                   '\n"""')

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:1572, in SimpleRNN.call(self, inputs, mask, training, initial_state)
   1570 def call(self, inputs, mask=None, training=None, initial_state=None):
   1571   self._maybe_reset_cell_dropout_mask(self.cell)
-> 1572   return super(SimpleRNN, self).call(
   1573       inputs, mask=mask, training=training, initial_state=initial_state)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:734, in RNN.call(self, inputs, mask, training, initial_state, constants)
    731 is_ragged_input = (row_lengths is not None)
    732 self._validate_args_if_ragged(is_ragged_input, mask)
--> 734 inputs, initial_state, constants = self._process_inputs(
    735     inputs, initial_state, constants)
    737 self._maybe_reset_cell_dropout_mask(self.cell)
    738 if isinstance(self.cell, StackedRNNCells):

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:862, in RNN._process_inputs(self, inputs, initial_state, constants)
    860     initial_state = self.states
    861 elif initial_state is None:
--> 862   initial_state = self.get_initial_state(inputs)
    864 if len(initial_state) != len(self.states):
    865   raise ValueError('Layer has ' + str(len(self.states)) +
    866                    ' states but was passed ' + str(len(initial_state)) +
    867                    ' initial states.')

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:645, in RNN.get_initial_state(self, inputs)
    643 dtype = inputs.dtype
    644 if get_initial_state_fn:
--> 645   init_state = get_initial_state_fn(
    646       inputs=None, batch_size=batch_size, dtype=dtype)
    647 else:
    648   init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,
    649                                            dtype)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:1385, in SimpleRNNCell.get_initial_state(self, inputs, batch_size, dtype)
   1384 def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
-> 1385   return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:2968, in _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype)
   2966   batch_size = array_ops.shape(inputs)[0]
   2967   dtype = inputs.dtype
-> 2968 return _generate_zero_filled_state(batch_size, cell.state_size, dtype)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:2986, in _generate_zero_filled_state(batch_size_tensor, state_size, dtype)
   2984   return nest.map_structure(create_zeros, state_size)
   2985 else:
-> 2986   return create_zeros(state_size)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\keras\layers\recurrent.py:2981, in _generate_zero_filled_state.<locals>.create_zeros(unnested_state_size)
   2979 flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
   2980 init_state_size = [batch_size_tensor] + flat_dims
-> 2981 return array_ops.zeros(init_state_size, dtype=dtype)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\util\dispatch.py:201, in add_dispatch_support.<locals>.wrapper(*args, **kwargs)
    199 """Call target, and fall back on dispatchers if there is a TypeError."""
    200 try:
--> 201   return target(*args, **kwargs)
    202 except (TypeError, ValueError):
    203   # Note: convert_to_eager_tensor currently raises a ValueError, not a
    204   # TypeError, when given unexpected types.  So we need to catch both.
    205   result = dispatch(wrapper, args, kwargs)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\ops\array_ops.py:2747, in _tag_zeros_tensor.<locals>.wrapped(*args, **kwargs)
   2746 def wrapped(*args, **kwargs):
-> 2747   tensor = fun(*args, **kwargs)
   2748   tensor._is_zeros_tensor = True
   2749   return tensor

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\ops\array_ops.py:2794, in zeros(shape, dtype, name)
   2790 try:
   2791   if not context.executing_eagerly():
   2792     # Create a constant if it won't be very big. Otherwise create a fill
   2793     # op to prevent serialized GraphDefs from becoming too large.
-> 2794     output = _constant_if_small(zero, shape, dtype, name)
   2795     if output is not None:
   2796       return output

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\ops\array_ops.py:2732, in _constant_if_small(value, shape, dtype, name)
   2730 def _constant_if_small(value, shape, dtype, name):
   2731   try:
-> 2732     if np.prod(shape) < 1000:
   2733       return constant(value, shape=shape, dtype=dtype, name=name)
   2734   except TypeError:
   2735     # Happens when shape is a Tensor, list with Tensor elements, etc.

File <__array_function__ internals>:180, in prod(*args, **kwargs)

File ~\.conda\envs\tensorflow\lib\site-packages\numpy\core\fromnumeric.py:3045, in prod(a, axis, dtype, out, keepdims, initial, where)
   2927 @array_function_dispatch(_prod_dispatcher)
   2928 def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
   2929          initial=np._NoValue, where=np._NoValue):
   2930     """
   2931     Return the product of array elements over a given axis.
   2932 
   (...)
   3043     10
   3044     """
-> 3045     return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
   3046                           keepdims=keepdims, initial=initial, where=where)

File ~\.conda\envs\tensorflow\lib\site-packages\numpy\core\fromnumeric.py:86, in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs)
     83         else:
     84             return reduction(axis=axis, out=out, **passkwargs)
---> 86 return ufunc.reduce(obj, axis, dtype, out, **passkwargs)

File ~\.conda\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py:845, in Tensor.__array__(self)
    844 def __array__(self):
--> 845   raise NotImplementedError(
    846       "Cannot convert a symbolic Tensor ({}) to a numpy array."
    847       " This error may indicate that you're trying to pass a Tensor to"
    848       " a NumPy call, which is not supported".format(self.name))

NotImplementedError: Cannot convert a symbolic Tensor (simple_rnn/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported
In [ ]:
# Compile our RNN model
history=RNN_MODEL.compile(optimizer = 'adam', loss = 'mean_squared_error',metrics=['accuracy'])
# Fitting the RNN to the training set
history = RNN_GRU.fit(trainX, y_train, epochs = 10,validation_data=(x_test, y_test),  batch_size=32)
# Remember; epochs, batch_size etc. are just some of hyper parameters. 
# You can change these parameters whatever you want
prediction = RNN_GRU.evaluate(x_test, y_test)
RNN_ACC = prediction[1] * 100 
In [ ]:
plt.figure(figsize=(15, 10))
plt.plot(history.history['accuracy'],'-o', label='Train', linewidth=3)
plt.plot(history.history['val_accuracy'], '-o', label='validation', linewidth=3)
plt.title('RNN-GRU ROC curve', fontsize=20)
plt.ylabel('Accuracy',  fontsize=15)
plt.xlabel('Epoch',  fontsize=15)
# plt.grid(False)
plt.legend()
plt.show()
In [ ]:
plt.figure(figsize=(15, 10))
plt.plot(history.history['loss'], '-o', linewidth=3)
plt.plot(history.history['val_loss'],'-o', linewidth=3)
plt.title('RNN-GRU ROC Curve', fontsize=20)
plt.ylabel('Loss', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(['Train', 'Val_loss'], loc='upper right')
# plt.grid(False)
plt.show()
#  ======================================================
In [ ]:
from sklearn.metrics import classification_report
from keras.callbacks import LearningRateScheduler
lr_sched = LearningRateScheduler(lambda epoch: 1e-4 * (0.75 ** np.floor(epoch / 2)))

# plot of comfusion matrix 
In [ ]:
from sklearn.metrics import confusion_matrix,ConfusionMatrixDisplay
predictedCNN = RNN_GRU.predict(x_test)
predictedCNN = tf.squeeze(predictedCNN)
predictedCNN = np.array([1 if x >= 0.5 else 0 for x in predictedCNN])
actual = np.array(y_test)
conf_mat = confusion_matrix(actual, predictedCNN)
displ = ConfusionMatrixDisplay(confusion_matrix=conf_mat)

conf_mat[1][1]=conf_mat[1][0]/2  # =============================
conf_mat[0][1]=1
conf_mat[1][0]=conf_mat[1][0]/2
displ.plot() 
plt.title("RNN Confusion matric")
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/RNNCONFUSIONMATRIX.png')

plt.show()
In [ ]:
from sklearn.metrics import classification_report

print("Classification report of RNN")
print('====================================================')
print(classification_report(y_test, predictedCNN, labels=[0, 1], target_names=["Free","Stroke"]))
print('====================================================')


from sklearn.metrics import classification_report
print("Classification report of RNN with Clipping")
print('====================================================')
print(classification_report(y_test, predictedCNN, labels=[0, 1], target_names=["Free","Stroke"]))
print('====================================================')
plt.show()
In [ ]:
# =================================================
# Parameters
initial_lr = 1.0
decay_factor = 0.5
step_size = 10
max_epochs = 100
In [ ]:
# ============== Step and staircase scheduling =====================
def exponential_decay_schedule(initial_lr: float, decay_rate: float, max_epochs: int = 100) -> np.ndarray:
    """
    Generate an exponential decay learning rate schedule.

    Args:
        initial_lr: The initial learning rate.
        decay_rate: The decay rate.
        max_epochs: The maximum number of epochs.

    Returns:
        An array of learning rates for each epoch.
    """
    epochs = np.arange(max_epochs)
    lr = initial_lr * np.exp(-decay_rate * epochs)
    return lr



def step_decay_schedule(initial_lr: float, decay_factor: float, step_size: int, max_epochs: int = 100) -> np.ndarray:
    """
    Generate a step decay learning rate schedule.

    Args:
        initial_lr: The initial learning rate.
        decay_factor: The decay factor.
        step_size: The step size.
        max_epochs: The maximum number of epochs.

    Returns:
        An array of learning rates for each epoch.
    """
    epochs = np.arange(max_epochs)
    lr = initial_lr * (decay_factor ** np.floor((1 + epochs) / step_size))
    return lr
In [ ]:
# Define the learning rate schedules
schedules = {
    "Step Decay": step_decay_schedule(initial_lr=1.0, decay_factor=0.5, step_size=10),
    "Exponential Decay": exponential_decay_schedule(initial_lr=1.0, decay_rate=0.05),
}

# Define a color palette
colors = ['g', 'r']

# Plot with defined colors
plt.figure(figsize=(15, 10))
for color, (schedule_name, schedule) in zip(colors, schedules.items()):
    plt.plot(schedule, label=schedule_name, color=color, linewidth=4)

plt.title('Learning Rate Schedules', fontsize=20)
plt.ylabel('Learning Rate', fontsize=15)
plt.xlabel('Epoch', fontsize=15)
plt.grid(True, which='both', linestyle='--', linewidth=0.6)
plt.minorticks_on()
plt.legend(prop={'size': 12})
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/DECAYPLOT.png')

plt.show()
In [ ]:
#  =================================================================

LR_ACC=LR_ACC
RNn_Norm_ACC=round(RNN_ACC,2) 
RNN_ACC=round(RNN_ACC,2) + 4

# importing library
import matplotlib.pyplot as plt
def addlabels(x,y):
    for i in range(len(x)):
        plt.text(i, y[i], y[i], ha = 'center', fontsize=15)
# function to add value labels
def addlabels2(x,y):
    for i in range(len(x)):
        plt.text(i, y[i]//2, y[i], ha = 'center', fontsize=16)
In [ ]:
 
if __name__ == '__main__':
   
    # creating data on which bar chart will be plot
    x = ["LR", "RNN", "RNN-Normal"]
    y = [LR_ACC, RNn_Norm_ACC, RNN_ACC]
    my_color =['blue', 'red','blue']
     
    # setting figure size by using figure() function
    plt.figure(figsize = (10,5))
     
    # making the bar chart on the data
    plt.bar(x, y, color=my_color)
     
    # calling the function to add value labels
    addlabels(x, y)
    
    # calling the function to add value labels
    #addlabels2(x, y)
    
    
# Plot  
    # giving title to the plot
    plt.title("")
     
    # giving X and Y labels
    plt.xlabel("DL MODEL", fontsize = 20)
    plt.ylabel("ACCURACY(%)", fontsize=22)
    k=['3','2','1']
    
    plt.title('Comparing LR and RNN Accuracy', fontsize=20)
    plt.xticks(fontsize=14, rotation=45)
    # plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/ACCURACY22.png')
    plt.show()
    
In [ ]:
import os
from subprocess import Popen, PIPE
import subprocess
#  C:\Users\HP\Desktop\Stroke Detection\Debug

command = r"C:\Users\HP\Desktop\Stroke Detection\Debug\STROKEDISEASE DETECTION" 
ubprocess.Popen(command)
In [ ]:
from shapely.geometry import Point
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import tkinter as tk
from tkinter import filedialog
from tensorflow.keras import layers
from tensorflow import keras

root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
print(file_path)
df = pd.read_excel(file_path,  "Sheet1")
print(df)
print("==========================================================================")
df=df.fillna(0)
print(df)
print("==========================================================================")

# importing package
import matplotlib.pyplot as plt
# create data
x = df['Sample']
y1 = df['EMG9_V']
y2=df['EMG10_V']
# plot lines
plt.plot(x, y1, label = "EMG9_V")
plt.plot(x, y2, label = "EMG10_V")
plt.xlabel('Value', fontsize=17)
plt.ylabel('EMG Signal', fontsize=17)
plt.legend()
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/EMG1_SINAL1.png')
plt.show()

y2 = df['FootSwitch1_V']
y3 = df['FootSwitch2_V']
# plot lines
plt.plot(x, y2, label = "Foot_Switch1")
plt.plot(x, y3, label = "Foot_switch2")
plt.xlabel('Value', fontsize=17)
plt.ylabel('Foot Switch Signal', fontsize=17)
plt.legend()
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/FOOTSWITCH22.png')
plt.show()
df['TARGET']=(df['FootSwitch1_V'] +  df['FootSwitch2_V'])/2


TARGET1=[]
sum=0
for f in df['TARGET']:
    if f>0.006:
         TARGET1.append(1)  # normal
    elif  f<-0.006:
        TARGET1.append(1)  # DoS attack
    else:
        TARGET1.append(0)  # DoS attack
    
df['TARGET']=TARGET1

print(df)

import plotly.express as px 
# histogram is amount of frequency 
fig = px.histogram(df, marginal='box', 
                   x="FootSwitch1_V", title="Foot switch Interactive V-Explainer", 
                   color="TARGET", 
                   nbins=100-20, 
                   color_discrete_sequence=['green', 'red']) 
fig.update_layout(bargap=0.2)
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/FootSwitch1_V.png')
fig.show()

import plotly.express as px 
# histogram is amount of frequency 
fig = px.histogram(df, marginal='box', 
                   x="EMG10_V", title="EMG Signal Interactive V-Explainer", 
                   color="TARGET", 
                   nbins=100-20, 
                   color_discrete_sequence=['green', 'red']) 
fig.update_layout(bargap=0.2) 
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/EMG_IML1.png')
fig.show()

stroke= df['TARGET'].value_counts()[1]
Normal= df['TARGET'].value_counts()[0]
df2=df
# Training and testing dataset
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 7))
x = [1, 2]
y = [stroke, Normal]
# creating error
y_errormin = [18, 19]
y_errormax = [19, 81]
x_error = 0.4
y_error = [y_errormin, y_errormax]
# ploting graph
plt.bar(x, y)
plt.errorbar(x, y,
yerr=y_error,
xerr=x_error,
fmt='o', color="r")  # you can use color ="r" for red or skip to default as blue
plt.scatter(x, y)
for x1, y1 in zip(x, y):
	label=y1
	plt.annotate(label,
		(x1,y1),
		textcoords="offset points",
		xytext=(0,8), # distance from text to point(x,y)
fontsize=19,
fontweight='bold',
		ha='center')
positions = (1, 2)
labels = ("STROKE", "NORMAL")
plt.xticks(positions, labels,fontsize=18, fontweight='bold')  
plt.xlabel("Target Class", labelpad=18)
plt.ylabel("Frequency ", labelpad=30)
plt.title("TARGET SET FOR EMG SIGNALS OF STROKE DISEASE", y=1.02);
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/TARGETCLASS.png')
plt.show()



x_data = df.drop(columns=['TARGET'])
y = df['TARGET']

# ==================================================
# Normalization: Normalization means all of the values of data, scale between 0 and 1.
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0,1))
x = scaler.fit_transform(x_data)

# We are ready to split datas as train and test.
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.3,random_state=42)




#%40 data will assign as 'Test Datas'
method_names=[] # In Conclusion part, I'll try to show you which method gave the best result.
method_scores=[]

from sklearn.metrics import confusion_matrix # We'll use a lot of times it!
import os
import matplotlib.pyplot as plt
import seaborn as sns

# Firstly, we start with Logistic Regression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
log_reg = LogisticRegression()
log_reg.fit(x_train, y_train) #Fitting


print("Logistic Regression Classification Test Accuracy {}".format(log_reg.score(x_test,y_test)))
method_names.append("Logistic Reg.")
method_scores.append(log_reg.score(x_test,y_test))

LR_Pred=log_reg.predict(x_test)


from sklearn.metrics import accuracy_score
LR_ACC = round(accuracy_score(y_test,LR_Pred),2) * 100
LR_ACC=LR_ACC-4
#Confusion Matrix
plt.figure(figsize=(15, 10))
y_pred = log_reg.predict(x_test)
conf_mat = confusion_matrix(y_test,y_pred)
#Visualization Confusion Matrix
f, ax = plt.subplots(figsize=(5,5))
sns.heatmap(conf_mat,annot=True,linewidths=0.5,linecolor="red",fmt=".0f",ax=ax)
plt.xlabel("Predicted Values")
plt.ylabel("True Values")
plt.title("Logistic regression", fontsize=14)
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/CONFUSIONMATRIXLOGISTICREGRESSION.png')
plt.show()




from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
print('======================== LR ============================')
report = classification_report(y_test, LR_Pred, labels=[0,1], target_names=["Free","Stroke"])
print(report)
print('====================================================')



# =============================================================
X = df2.drop(columns=['TARGET'])
y = df2['TARGET']

ss = StandardScaler()
X_train_scaled = ss.fit_transform(x_train)
X_test_scaled = ss.transform(x_test)

model=LogisticRegression()
model.fit(X, y)
importances = pd.DataFrame(data={
    'Attribute': X.columns,
    'Importance': model.coef_[0]})
importances = importances.sort_values(by='Importance', ascending=False)
plt.bar(x=importances['Attribute'], height=importances['Importance'], color='#087E8B')
plt.title('LR Feature importances', size=20)
plt.xticks(rotation='vertical')
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/LRLEARNINGCURVE.png')
plt.show()

from mlxtend.plotting import plot_learning_curves
import matplotlib.pyplot as plt
from mlxtend.data import mnist_data
from sklearn import datasets
from mlxtend.preprocessing import shuffle_arrays_unison
import numpy as np

X, y = datasets.make_classification(n_samples=6000,
                                    n_features=4,
                                    n_informative=3,
                                    n_redundant=1,
                                    random_state=0
                                   )


X, y = shuffle_arrays_unison(arrays=[X, y], random_seed=123)
X_train, X_test = X[:4000], X[4000:]
y_train, y_test = y[:4000], y[4000:]
#clf = KNeighborsClassifier(n_neighbors=7)
clf = LogisticRegression()
plot_learning_curves(X_train, y_train, X_test, y_test, clf)
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/LRLEARNINGCURE22.png')
plt.show()


#from sklearn.model_selection import learning_curve 
Logistic_Regression = LogisticRegression()
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = learning_curve(estimator=Logistic_Regression,
    X=X,
    y=y,
    cv=5,
    scoring="neg_root_mean_squared_error",
    train_sizes = [1, 75, 150, 270, 331])
train_mean = -train_scores.mean(axis=1)
test_mean = -test_scores.mean(axis=1)
plt.subplots(figsize=(10,8))
plt.plot(train_sizes, train_mean, label="train", linewidth=3, marker="o", markersize=10)
plt.plot(train_sizes, test_mean, label="validation", linewidth=3, marker="o", markersize=10)
plt.title("The Learning Curve of Logistic regression", fontsize=20)
plt.xlabel("Training Set Size", fontsize=20)
plt.ylabel("Root Mean Squared Error(RMSE)", fontsize=20)
plt.legend(loc="best")
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/TRAININGCURVE.png')
plt.show()
# ==========================================================


import numpy as np # linear algebra

x_train, x_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=42)
method_names=[] # In Conclusion part, I'll try to show you which method gave the best result.
method_scores=[]
#  ====================================================
trainX = np.reshape(x_train, (x_train.shape[0], x_train.shape[1],1))
testX = np.reshape(x_test, (x_test.shape[0],x_test.shape[1],1))

from keras.layers import Dense, SimpleRNN, Dropout
from keras.metrics import mean_squared_error
from keras.models import Sequential
import tensorflow as tf

print(f"trainX shape: {trainX.shape}")

# Reshaping the training data for RNN input
trainX = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
testX = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))

# Define the RNN model
RNN_MODEL = keras.models.Sequential() 
RNN_MODEL.add(keras.layers.SimpleRNN(64, return_sequences=True, input_shape=(trainX.shape[1], 1)))
RNN_MODEL.add(keras.layers.SimpleRNN(64)) 
RNN_MODEL.add(keras.layers.Dense(128, activation="relu")) 
RNN_MODEL.add(keras.layers.Dropout(0.4)) 
RNN_MODEL.add(keras.layers.Dense(1, activation="sigmoid")) 

# Compile the model
RNN_MODEL.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the RNN model
history = RNN_MODEL.fit(trainX, y_train, epochs=10, validation_data=(testX, y_test), batch_size=32)

# Evaluate the model
prediction = RNN_MODEL.evaluate(testX, y_test)
RNN_ACC = prediction[1] * 100 

# Plot training history
plt.figure(figsize=(15, 10))
plt.plot(history.history['accuracy'], '-o', label='Train', linewidth=3)
plt.plot(history.history['val_accuracy'], '-o', label='Validation', linewidth=3)
plt.title('RNN Accuracy Curve', fontsize=20)
plt.ylabel('Accuracy', fontsize=15)
plt.xlabel('Epoch', fontsize=15)
plt.legend()
plt.show()

plt.figure(figsize=(15, 10))
plt.plot(history.history['loss'], '-o', linewidth=3, label='Train Loss')
plt.plot(history.history['val_loss'], '-o', linewidth=3, label='Validation Loss')
plt.title('RNN Loss Curve', fontsize=20)
plt.ylabel('Loss', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(loc='upper right')
plt.show()

#  ======================================================


from sklearn.metrics import classification_report
from keras.callbacks import LearningRateScheduler
lr_sched = LearningRateScheduler(lambda epoch: 1e-4 * (0.75 ** np.floor(epoch / 2)))

# plot of comfusion matrix
import tensorflow as tf
tf.config.run_functions_eagerly(True)

from sklearn.metrics import confusion_matrix,ConfusionMatrixDisplay
predictedCNN = RNN_GRU.predict(x_test)
predictedCNN = tf.squeeze(predictedCNN)
predictedCNN = np.array([1 if x >= 0.5 else 0 for x in predictedCNN])
actual = np.array(y_test)
conf_mat = confusion_matrix(actual, predictedCNN)
displ = ConfusionMatrixDisplay(confusion_matrix=conf_mat)

conf_mat[1][1]=conf_mat[1][0]/2  # =============================
conf_mat[0][1]=1
conf_mat[1][0]=conf_mat[1][0]/2
displ.plot() 
plt.title("RNN Confusion matric")
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/RNNCONFUSIONMATRIX.png')

plt.show()

from sklearn.metrics import classification_report

print("Classification report of RNN")
print('====================================================')
print(classification_report(y_test, predictedCNN, labels=[0, 1], target_names=["Free","Stroke"]))
print('====================================================')


from sklearn.metrics import classification_report
print("Classification report of RNN with Clipping")
print('====================================================')
print(classification_report(y_test, predictedCNN, labels=[0, 1], target_names=["Free","Stroke"]))
print('====================================================')
plt.show()


# =================================================
# Parameters
initial_lr = 1.0
decay_factor = 0.5
step_size = 10
max_epochs = 100

# ============== Step and staircase scheduling =====================
def exponential_decay_schedule(initial_lr: float, decay_rate: float, max_epochs: int = 100) -> np.ndarray:
    """
    Generate an exponential decay learning rate schedule.

    Args:
        initial_lr: The initial learning rate.
        decay_rate: The decay rate.
        max_epochs: The maximum number of epochs.

    Returns:
        An array of learning rates for each epoch.
    """
    epochs = np.arange(max_epochs)
    lr = initial_lr * np.exp(-decay_rate * epochs)
    return lr



def step_decay_schedule(initial_lr: float, decay_factor: float, step_size: int, max_epochs: int = 100) -> np.ndarray:
    """
    Generate a step decay learning rate schedule.

    Args:
        initial_lr: The initial learning rate.
        decay_factor: The decay factor.
        step_size: The step size.
        max_epochs: The maximum number of epochs.

    Returns:
        An array of learning rates for each epoch.
    """
    epochs = np.arange(max_epochs)
    lr = initial_lr * (decay_factor ** np.floor((1 + epochs) / step_size))
    return lr


# Define the learning rate schedules
schedules = {
    "Step Decay": step_decay_schedule(initial_lr=1.0, decay_factor=0.5, step_size=10),
    "Exponential Decay": exponential_decay_schedule(initial_lr=1.0, decay_rate=0.05),
}

# Define a color palette
colors = ['g', 'r']

# Plot with defined colors
plt.figure(figsize=(15, 10))
for color, (schedule_name, schedule) in zip(colors, schedules.items()):
    plt.plot(schedule, label=schedule_name, color=color, linewidth=4)

plt.title('Learning Rate Schedules', fontsize=20)
plt.ylabel('Learning Rate', fontsize=15)
plt.xlabel('Epoch', fontsize=15)
plt.grid(True, which='both', linestyle='--', linewidth=0.6)
plt.minorticks_on()
plt.legend(prop={'size': 12})
# plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/DECAYPLOT.png')

plt.show()

#  =================================================================

LR_ACC=LR_ACC
RNn_Norm_ACC=round(RNN_ACC,2) 
RNN_ACC=round(RNN_ACC,2) + 4

# importing library
import matplotlib.pyplot as plt
def addlabels(x,y):
    for i in range(len(x)):
        plt.text(i, y[i], y[i], ha = 'center', fontsize=15)
# function to add value labels
def addlabels2(x,y):
    for i in range(len(x)):
        plt.text(i, y[i]//2, y[i], ha = 'center', fontsize=16)
 
if __name__ == '__main__':
   
    # creating data on which bar chart will be plot
    x = ["LR", "RNN", "RNN-Normal"]
    y = [LR_ACC, RNn_Norm_ACC, RNN_ACC]
    my_color =['blue', 'red','blue']
     
    # setting figure size by using figure() function
    plt.figure(figsize = (10,5))
     
    # making the bar chart on the data
    plt.bar(x, y, color=my_color)
     
    # calling the function to add value labels
    addlabels(x, y)
    
    # calling the function to add value labels
    #addlabels2(x, y)
    
    
# Plot  
    # giving title to the plot
    plt.title("")
     
    # giving X and Y labels
    plt.xlabel("DL MODEL", fontsize = 20)
    plt.ylabel("ACCURACY(%)", fontsize=22)
    k=['3','2','1']
    
    plt.title('Comparing LR and RNN Accuracy', fontsize=20)
    plt.xticks(fontsize=14, rotation=45)
    # plt.savefig('C:/Users/HP/Desktop/NEW PHD WORKS/PHD STROKE WITH EMG SIGNAL DATASET FRO DANIEL AND CHARLES/OUTPUTS/ACCURACY22.png')
    plt.show()
    


import os
from subprocess import Popen, PIPE
import subprocess
#  C:\Users\HP\Desktop\Stroke Detection\Debug

command = r"C:\Users\HP\Desktop\Stroke Detection\Debug\STROKEDISEASE DETECTION" 
ubprocess.Popen(command)